Power Plants
MISC
Introduction
This data has been gathered at two solar power plants in India over a 34 day period. It has two pairs of files - each pair has one power generation dataset and one sensor readings dataset. The power generation datasets are gathered at the inverter level - each inverter has multiple lines of solar panels attached to it. The sensor data is gathered at a plant level - single array of sensors optimally placed at the plant.
There are a few areas of concern at the solar power plant -
- Can we predict the power generation for next couple of days? - this allows for better grid management
- Can we identify the need for panel cleaning/maintenance?
- Can we identify faulty or sub-optimally performing equipment?
Ideas
- arima / stl modeling
- anomaly detection, boxplots
- related to 2?
Resources / Business Knowledge
DATA DICTIONARY for Power Generation data sets
- AC_POWER : Amount of AC power generated by the inverter (source_key) in this 15 minute interval. Units - kW.
- AC_ : Amount of DC power generated by the inverter (source_key) in this 15 minute interval. Units - kW.
- DAILY_YIELD : Daily yield is a cumulative sum of power generated on that day, till that point in time.
- DATE_TIME : Date and time for each observation. Observations recorded at 15 minute intervals.
- PLANT_ID : Plant ID - this will be common for the entire file.
- SOURCE_KEY : Source key in this file stands for the inverter id.
- TOTAL_YIELD : This is the total yield for the inverter till that point in time.
Power Generation Inverter data set: Plant 1
Get and Clean Data
p1.gd = read_csv('Plant_1_Generation_Data.csv') %>%
slice_sample(prop = 0.10) %>% #!!<NOTE>temp, working with a sample of datset for speed purposes
clean_names() %>% #lowercase
select(sort(tidyselect::peek_vars())) %>% #sort cols alphabetically
select(where(is.factor),where(is.character),where(is.numeric)) #sort cols by data type
#OlsonNames()
#https://stackoverflow.com/questions/41479008/what-is-the-correct-tz-database-time-zone-for-india
p1.gd = p1.gd %>% mutate(
date_time = as.POSIXct(strptime(p1.gd$date_time, "%d-%m-%Y %H:%M"), tz = 'Asia/Kolkata'),
source_key = factor(p1.gd$source_key)
) %>% rename(inverter = source_key)
p1.gd$plant_id = NULLglimpse structure, and sample rows
## Rows: 6,877
## Columns: 6
## $ date_time <dttm> 2020-05-18 01:15:00, 2020-05-18 02:00:00, 2020-05-31 1...
## $ inverter <fct> YxYtjZvoooNbGkE, zBIq5rxdHJRwDNY, sjndEbLyjtCKgGv, sjnd...
## $ ac_power <dbl> 0.0000, 0.0000, 542.2500, 934.5375, 0.0000, 0.0000, 178...
## $ daily_yield <dbl> 0.0000, 0.0000, 2367.2500, 1616.5000, 0.0000, 0.0000, 8...
## $ dc_power <dbl> 0.000, 0.000, 5531.125, 9566.750, 0.000, 0.000, 1819.00...
## $ total_yield <dbl> 7200229, 6359657, 7138622, 7067503, 7163262, 7281035, 7...
EDA: Factor Vars
counts each factor’s unique levels
## .
## inverter 22
reference: names of unique levels
## inverter
## 1 YxYtjZvoooNbGkE
## 2 zBIq5rxdHJRwDNY
## 3 sjndEbLyjtCKgGv
## 4 z9Y9gH1T5YWrNuG
## 5 uHbuxQJl8lW7ozc
## 6 VHMLBKoKgIrUVDU
## 7 wCURE6d3bPkepu2
## 8 ZoEaEvLYb1n2sOq
## 9 iCRJl6heRkivqQ3
## 10 1IF53ai7Xc0U56Y
## 11 7JYdWkrLSPkdwr4
## 12 3PZuoBAID5Wc2HD
## 13 WRmjgnKYAwPKWDb
## 14 ih0vzX44oOqAx2f
## 15 pkci93gMrogZuBj
## 16 bvBOhCH3iADSZry
## 17 ZnxXDlPa8U1GXgE
## 18 adLQvlD726eNBSB
## 19 zVJPv84UY57bAof
## 20 McdE0feGgRqW7Ca
## 21 1BY6WEcLGh8j5v7
## 22 rGa61gmuvPhdLxV
viz: distribution of level counts
jpal = colorRampPalette(brewer.pal(8,'Dark2'))(22)
p1.gd %>% count(inverter) %>% plot_ly(y = ~fct_reorder(inverter,n), x = ~n, color = ~inverter, colors = jpal) %>% add_bars(hoverinfo = 'text', text = ~n) %>% hide_legend() %>% layout(
title = 'Source Key Counts',
xaxis = list(title = ''),
yaxis = list(title = '')
) ## Warning: `arrange_()` is deprecated as of dplyr 0.7.0.
## Please use `arrange()` instead.
## See vignette('programming') for more help
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_warnings()` to see where this warning was generated.
EDA: Numeric Vars
viz bivariate numeric distribution
viz: numeric univariate distributions
names.numeric = p1.gd %>% select(where(is.numeric)) %>% names
p1.gd %>% dlookr::plot_normality(
names.numeric[1],
names.numeric[2],
names.numeric[3]
)viz: numeric univariate distributions
viz: distributions by ‘inverter’ factor
p1.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = daily_yield, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~daily_yield, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of Daily Yield by Inverter')p1.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = ac_power, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~ac_power, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of AC Power by Inverter')p1.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = dc_power, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~dc_power, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of DC Power by Inverter')viz: ‘Rest?/Maintenace? Days’ Check
## Warning: `cols` is now required when using unnest().
## Please use `cols = c(data)`
## # A tibble: 6,877 x 6
## inverter date_time ac_power daily_yield dc_power total_yield
## <fct> <dttm> <dbl> <dbl> <dbl> <dbl>
## 1 1BY6WEcLGh8j5v7 2020-05-15 01:15:00 0 0 0 6259559
## 2 1BY6WEcLGh8j5v7 2020-05-15 04:15:00 0 0 0 6259559
## 3 1BY6WEcLGh8j5v7 2020-05-15 08:45:00 418. 556. 4257. 6260115.
## 4 1BY6WEcLGh8j5v7 2020-05-15 09:00:00 559. 689. 5707. 6260248.
## 5 1BY6WEcLGh8j5v7 2020-05-15 10:15:00 650. 1308. 6637. 6260867.
## 6 1BY6WEcLGh8j5v7 2020-05-15 14:30:00 532. 4398. 5430. 6263957.
## 7 1BY6WEcLGh8j5v7 2020-05-15 19:30:00 0 5754 0 6265313
## 8 1BY6WEcLGh8j5v7 2020-05-16 02:00:00 0 0 0 6265313
## 9 1BY6WEcLGh8j5v7 2020-05-16 03:30:00 0 0 0 6265313
## 10 1BY6WEcLGh8j5v7 2020-05-16 11:30:00 718. 2639 7335. 6267952
## # ... with 6,867 more rows
p1.gd.plots = p1.gd %>% arrange(date_time) %>% group_nest(inverter) %>% mutate(
plots = map2(
.x = data,
.y = inverter,
~ggplot(data = .x, aes(date_time, daily_yield, color = daily_yield == 0, group = 1)) +
geom_line(size = 1.2) + scale_color_manual(values = c('black','red')) + ggtitle(paste0('Inverter: ', .y))
))
p1.gd.plots$plots## [[1]]
##
## [[2]]
##
## [[3]]
##
## [[4]]
##
## [[5]]
##
## [[6]]
##
## [[7]]
##
## [[8]]
##
## [[9]]
##
## [[10]]
##
## [[11]]
##
## [[12]]
##
## [[13]]
##
## [[14]]
##
## [[15]]
##
## [[16]]
##
## [[17]]
##
## [[18]]
##
## [[19]]
##
## [[20]]
##
## [[21]]
##
## [[22]]
EDA: Time Series Viz
Anomoly Plot
library(scales)
library(anomalize)
# anomalize(data, target, method = c("iqr", "gesd"), alpha = 0.05, max_anoms = 0.2, verbose = FALSE)
# alpha: Controls the width of the "normal" range. Lower values are more conservative while higher values are less prone to incorrectly classifying "normal" observations.
# max_anoms: The maximum percent of anomalies permitted to be identified.
p1.gd.anomalize = p1.gd %>% arrange(date_time) %>%
mutate(inverter = fct_reorder(inverter, -daily_yield)) %>%
group_by(inverter) %>%
time_decompose(daily_yield, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.05, method = 'gesd') %>%
time_recompose()
ggplotly(
p1.gd.anomalize %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'daily yield')
) %>% layout(showlegend = FALSE)Power Generation Inverter data set: Plant 2
Get and Clean Data
p2.gd = read_csv('Plant_2_Generation_Data.csv') %>%
slice_sample(prop = 0.10) %>% #!!<NOTE>temp, working with a sample of datset for speed purposes
clean_names() %>% #lowercase
select(sort(tidyselect::peek_vars())) %>% #sort cols alphabetically
select(where(is.POSIXct), where(is.factor),where(is.character),where(is.numeric)) #sort cols by data type
#OlsonNames()
#https://stackoverflow.com/questions/41479008/what-is-the-correct-tz-database-time-zone-for-india
p2.gd = p2.gd %>% mutate(
source_key = factor(p2.gd$source_key),
) %>% rename(inverter = source_key)
p2.gd$plant_id = NULLglimpse structure, and sample rows
## Rows: 6,769
## Columns: 6
## $ date_time <dttm> 2020-05-20 21:30:00, 2020-06-05 22:30:00, 2020-06-02 2...
## $ inverter <fct> Quc1TzYxW2pYoWX, oZ35aAeoifZaQzV, WcxssY2VbP4hApt, rrq4...
## $ ac_power <dbl> 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,...
## $ daily_yield <dbl> 2419.0000, 7708.0000, 3877.0000, 7271.0000, 0.0000, 0.0...
## $ dc_power <dbl> 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,...
## $ total_yield <dbl> 329530672, 1660126361, 181838353, 121118558, 593698089,...
EDA: Factor Vars
counts each factor’s unique levels
## .
## inverter 22
reference: names of unique levels
## inverter
## 1 Quc1TzYxW2pYoWX
## 2 oZ35aAeoifZaQzV
## 3 WcxssY2VbP4hApt
## 4 rrq4fwE8jgrTyWY
## 5 mqwcsP2rE7J0TFp
## 6 V94E5Ben1TlhnDV
## 7 LYwnQax7tkwH5Cb
## 8 PeE6FRyGXUgsRhN
## 9 xoJJ8DcxJEcupym
## 10 vOuJvMaM2sgwLmb
## 11 Mx2yZCDsyf6DPfv
## 12 Qf4GUc1pJu5T6c6
## 13 IQ2d7wF4YD8zU1Q
## 14 q49J1IKaHRwDQnt
## 15 9kRcWv60rDACzjR
## 16 oZZkBaNadn6DNKz
## 17 NgDl19wMapZy17u
## 18 4UPUqMRk7TRMgml
## 19 Et9kgGMDl729KT4
## 20 LlT2YUhhzqhg5Sw
## 21 81aHJ1q11NBPMrL
## 22 xMbIugepa2P7lBB
viz: distribution of level counts
jpal = colorRampPalette(brewer.pal(8,'Dark2'))(22)
p2.gd %>% count(inverter) %>% plot_ly(y = ~fct_reorder(inverter,n), x = ~n, color = ~inverter, colors = jpal) %>% add_bars(hoverinfo = 'text', text = ~n) %>% hide_legend() %>% layout(
title = 'Source Key Counts',
xaxis = list(title = ''),
yaxis = list(title = '')
) EDA: Numeric Vars
viz bivariate numeric distribution
viz: numeric univariate distributions
names.numeric = p2.gd %>% select(where(is.numeric)) %>% names
p2.gd %>% dlookr::plot_normality(
names.numeric[1],
names.numeric[2],
names.numeric[3],
names.numeric[4]
)viz: numeric univariate distributions
viz: distributions by ‘inverter’ factor
p2.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = total_yield, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~total_yield, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of Total Yield by Inverter')p2.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = daily_yield, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~daily_yield, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of Daily Yield by Inverter')p2.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = ac_power, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~ac_power, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of AC Power by Inverter')p2.gd %>% mutate(inverter = fct_reorder(.f = inverter, .x = dc_power, .fun = median, .desc = TRUE)) %>%
plot_ly(y = ~inverter, x = ~dc_power, color = ~inverter, colors = jpal) %>% add_boxplot()%>%
hide_legend() %>% layout(xaxis = list(title = ''), yaxis = list(title = ''), title = 'Distribution of DC Power by Inverter')viz: ‘Rest?/Maintenace? Days’ Check
## Warning: `cols` is now required when using unnest().
## Please use `cols = c(data)`
## # A tibble: 6,769 x 6
## inverter date_time ac_power daily_yield dc_power total_yield
## <fct> <dttm> <dbl> <dbl> <dbl> <dbl>
## 1 4UPUqMRk7TRMgml 2020-05-15 01:00:00 0 7540. 0 2429011
## 2 4UPUqMRk7TRMgml 2020-05-15 01:15:00 0 0 0 2429011
## 3 4UPUqMRk7TRMgml 2020-05-15 02:00:00 0 0 0 2429011
## 4 4UPUqMRk7TRMgml 2020-05-15 02:30:00 0 0 0 2429011
## 5 4UPUqMRk7TRMgml 2020-05-15 06:15:00 26.5 6.13 27.4 2429017.
## 6 4UPUqMRk7TRMgml 2020-05-15 10:15:00 0 2171 0 2431182
## 7 4UPUqMRk7TRMgml 2020-05-15 10:30:00 0 2171 0 2431182
## 8 4UPUqMRk7TRMgml 2020-05-15 12:00:00 0 2171 0 2431182
## 9 4UPUqMRk7TRMgml 2020-05-15 12:30:00 0 2171 0 2431182
## 10 4UPUqMRk7TRMgml 2020-05-15 14:00:00 0 2750 0 2431761
## # ... with 6,759 more rows
p2.gd.plots = p2.gd %>% arrange(date_time) %>% group_nest(inverter) %>% mutate(
plots = map2(
.x = data,
.y = inverter,
~ggplot(data = .x, aes(date_time, daily_yield, color = daily_yield == 0, group = 1)) +
geom_line(size = 1.2) + scale_color_manual(values = c('black','red')) + ggtitle(paste0('Inverter: ', .y))
))
p2.gd.plots$plots## [[1]]
##
## [[2]]
##
## [[3]]
##
## [[4]]
##
## [[5]]
##
## [[6]]
##
## [[7]]
##
## [[8]]
##
## [[9]]
##
## [[10]]
##
## [[11]]
##
## [[12]]
##
## [[13]]
##
## [[14]]
##
## [[15]]
##
## [[16]]
##
## [[17]]
##
## [[18]]
##
## [[19]]
##
## [[20]]
##
## [[21]]
##
## [[22]]
EDA: Time Series Viz
Anomoly Plot
library(scales)
library(anomalize)
# anomalize(data, target, method = c("iqr", "gesd"), alpha = 0.05, max_anoms = 0.2, verbose = FALSE)
# alpha: Controls the width of the "normal" range. Lower values are more conservative while higher values are less prone to incorrectly classifying "normal" observations.
# max_anoms: The maximum percent of anomalies permitted to be identified.
p2.gd.anomalize = p2.gd %>% arrange(date_time) %>%
mutate(inverter = fct_reorder(inverter, -daily_yield)) %>%
group_by(inverter) %>%
time_decompose(daily_yield, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.05, method = 'gesd') %>%
time_recompose()
ggplotly(
p2.gd.anomalize %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'daily yield')
) %>% layout(showlegend = FALSE)DATA DICTIONARY for Sensor Reading data sets
- IRRADIATION: Amount of irradiation for the 15 minute interval.
- DATE_TIME: Date and time for each observation. Observations recorded at 15 minute intervals.
- PLANT_ID: Plant ID - this will be common for the entire file.
- SOURCE_KEY: Stands for the sensor panel id. This will be common for the entire file because there’s only one sensor panel for the plant.
- MODULE_TEMPERATURE: There’s a module (solar panel) attached to the sensor panel. This is the temperature reading for that module.
- AMBIENT_TEMPERATURE: This is the ambient temperature at the plant.
Sensor Data Set: Plant 1
Get and Clean Data
p1.ws = read_csv('Plant_1_Weather_Sensor_Data.csv') %>%
slice_sample(prop = 0.10) %>% #!!<NOTE>temp, working with a sample of datset for speed purposes
clean_names() %>% #lowercase
mutate(across(where(is.character),factor)) %>%
select(sort(tidyselect::peek_vars())) %>% #sort cols alphabetically
select(date_time, where(is.factor), where(is.numeric)) %>% #sort cols by data type
arrange(date_time)
p1.ws$plant_id = NULL
p1.ws$source_key = NULLglimpse structure
## Rows: 318
## Columns: 4
## $ date_time <dttm> 2020-05-15 00:15:00, 2020-05-15 04:30:00, 2020...
## $ ambient_temperature <dbl> 25.08459, 24.06262, 24.17711, 25.95908, 27.9883...
## $ irradiation <dbl> 0.00000000, 0.00000000, 0.00000000, 0.34570765,...
## $ module_temperature <dbl> 22.76167, 21.85252, 22.55191, 35.52871, 46.6177...
EDA: Numeric Vars
viz: numeric univariate distributions
viz: numeric univariate outlier check
## # A tibble: 3 x 6
## variables outliers_cnt outliers_ratio outliers_mean with_mean without_mean
## <chr> <int> <dbl> <dbl> <dbl> <dbl>
## 1 ambient_temp~ 0 0 NaN 25.6 25.6
## 2 irradiation 4 1.26 1.10 0.223 0.212
## 3 module_tempe~ 0 0 NaN 31.0 31.0
correlations: viz
p1.ws %>% select(where(is.numeric)) %>% GGally::ggcorr(low = '#990000', mid = '#E0E0E0', high = '#009900', label = TRUE)viz: Time Series w/IQR percentiles
ggplotly(p1.ws %>% ggplot(aes(date_time, ambient_temperature)) +
geom_line(size = 1.2) +
geom_hline(yintercept = quantile(p1.ws$ambient_temperature, 0.25), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1) +
geom_hline(yintercept = quantile(p1.ws$ambient_temperature, 0.50), lty = 'dashed', alpha = 0.7, color = 'blue', size = 1.1) +
geom_hline(yintercept = quantile(p1.ws$ambient_temperature, 0.75), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1)
) %>% layout(title = 'ambient_temperature')ggplotly(p1.ws %>% ggplot(aes(date_time, irradiation)) +
geom_line(size = 1.2) +
geom_hline(yintercept = quantile(p1.ws$irradiation, 0.25), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1) +
geom_hline(yintercept = quantile(p1.ws$irradiation, 0.50), lty = 'dashed', alpha = 0.7, color = 'blue', size = 1.1) +
geom_hline(yintercept = quantile(p1.ws$irradiation, 0.75), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1)
) %>% layout(title = 'irradiation')ggplotly(p1.ws %>% ggplot(aes(date_time, module_temperature)) +
geom_line(size = 1.2) +
geom_hline(yintercept = quantile(p1.ws$module_temperature, 0.25), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1) +
geom_hline(yintercept = quantile(p1.ws$module_temperature, 0.50), lty = 'dashed', alpha = 0.7, color = 'blue', size = 1.1) +
geom_hline(yintercept = quantile(p1.ws$module_temperature, 0.75), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1)
) %>% layout(title = 'module_temperature')p1.ws %>% pivot_longer(ambient_temperature:module_temperature) %>% ggplot(aes(date_time, value, color = name)) + geom_line(size = 1.1) + facet_wrap(~name, ncol = 1, scales = 'free_y') + theme(legend.position = 'none')Anomoly Plot
library(scales)
library(anomalize)
# anomalize(data, target, method = c("iqr", "gesd"), alpha = 0.05, max_anoms = 0.2, verbose = FALSE)
# alpha: Controls the width of the "normal" range. Lower values are more conservative while higher values are less prone to incorrectly classifying "normal" observations.
# max_anoms: The maximum percent of anomalies permitted to be identified.
ggplotly(
p1.ws %>% arrange(date_time) %>%
time_decompose(ambient_temperature, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.15, method = 'gesd') %>%
time_recompose() %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'ambient_temperature')
) %>% layout(showlegend = FALSE, title = 'ambient_temperature')ggplotly(
p1.ws %>% arrange(date_time) %>%
time_decompose(irradiation, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.05, method = 'gesd') %>%
time_recompose() %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'irradiation')
) %>% layout(showlegend = FALSE, title = 'irradiation')ggplotly(
p1.ws %>% arrange(date_time) %>%
time_decompose(module_temperature, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.05, method = 'gesd') %>%
time_recompose() %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'module_temperature')
) %>% layout(showlegend = FALSE, title = 'module_temperature')Sensor Data Set: Plant 2
Get and Clean Data
p2.ws = read_csv('Plant_2_Weather_Sensor_Data.csv') %>%
slice_sample(prop = 0.10) %>% #!!<NOTE>temp, working with a sample of datset for speed purposes
clean_names() %>% #lowercase
mutate(across(where(is.character),factor)) %>%
select(sort(tidyselect::peek_vars())) %>% #sort cols alphabetically
select(date_time, where(is.factor), where(is.numeric)) %>% #sort cols by data type
arrange(date_time)
p2.ws$plant_id = NULL
p2.ws$source_key = NULLglimpse structure
## Rows: 325
## Columns: 4
## $ date_time <dttm> 2020-05-15 00:15:00, 2020-05-15 01:15:00, 2020...
## $ ambient_temperature <dbl> 26.88081, 26.51274, 25.48205, 29.31902, 34.2518...
## $ irradiation <dbl> 0.000000000, 0.000000000, 0.000000000, 0.625399...
## $ module_temperature <dbl> 24.42187, 25.31797, 24.79669, 40.73804, 54.8943...
EDA: Numeric Vars
viz: numeric univariate distributions
viz: numeric univariate outlier check
## # A tibble: 3 x 6
## variables outliers_cnt outliers_ratio outliers_mean with_mean without_mean
## <chr> <int> <dbl> <dbl> <dbl> <dbl>
## 1 ambient_temp~ 0 0 NaN 28.4 28.4
## 2 irradiation 0 0 NaN 0.249 0.249
## 3 module_tempe~ 0 0 NaN 33.4 33.4
correlations: viz
p2.ws %>% select(where(is.numeric)) %>% GGally::ggcorr(low = '#990000', mid = '#E0E0E0', high = '#009900', label = TRUE)viz: Time Series w/IQR percentiles
ggplotly(p2.ws %>% ggplot(aes(date_time, ambient_temperature)) +
geom_line(size = 1.2) +
geom_hline(yintercept = quantile(p2.ws$ambient_temperature, 0.25), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1) +
geom_hline(yintercept = quantile(p2.ws$ambient_temperature, 0.50), lty = 'dashed', alpha = 0.7, color = 'blue', size = 1.1) +
geom_hline(yintercept = quantile(p2.ws$ambient_temperature, 0.75), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1)
) %>% layout(title = 'ambient_temperature')ggplotly(p2.ws %>% ggplot(aes(date_time, irradiation)) +
geom_line(size = 1.2) +
geom_hline(yintercept = quantile(p2.ws$irradiation, 0.25), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1) +
geom_hline(yintercept = quantile(p2.ws$irradiation, 0.50), lty = 'dashed', alpha = 0.7, color = 'blue', size = 1.1) +
geom_hline(yintercept = quantile(p2.ws$irradiation, 0.75), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1)
) %>% layout(title = 'irradiation')ggplotly(p2.ws %>% ggplot(aes(date_time, module_temperature)) +
geom_line(size = 1.2) +
geom_hline(yintercept = quantile(p2.ws$module_temperature, 0.25), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1) +
geom_hline(yintercept = quantile(p2.ws$module_temperature, 0.50), lty = 'dashed', alpha = 0.7, color = 'blue', size = 1.1) +
geom_hline(yintercept = quantile(p2.ws$module_temperature, 0.75), lty = 'dashed', alpha = 0.7, color = 'red', size = 1.1)
) %>% layout(title = 'module_temperature')p2.ws %>% pivot_longer(ambient_temperature:module_temperature) %>% ggplot(aes(date_time, value, color = name)) + geom_line(size = 1.1) + facet_wrap(~name, ncol = 1, scales = 'free_y') + theme(legend.position = 'none')Anomoly Plot
library(scales)
library(anomalize)
# anomalize(data, target, method = c("iqr", "gesd"), alpha = 0.05, max_anoms = 0.2, verbose = FALSE)
# alpha: Controls the width of the "normal" range. Lower values are more conservative while higher values are less prone to incorrectly classifying "normal" observations.
# max_anoms: The maximum percent of anomalies permitted to be identified.
ggplotly(
p2.ws %>% arrange(date_time) %>%
time_decompose(ambient_temperature, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.15, method = 'gesd') %>%
time_recompose() %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'ambient_temperature')
) %>% layout(showlegend = FALSE, title = 'ambient_temperature')ggplotly(
p2.ws %>% arrange(date_time) %>%
time_decompose(irradiation, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.05, method = 'gesd') %>%
time_recompose() %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'irradiation')
) %>% layout(showlegend = FALSE, title = 'irradiation')ggplotly(
p2.ws %>% arrange(date_time) %>%
time_decompose(module_temperature, method = 'twitter', merge = TRUE) %>%
anomalize(remainder, alpha = 0.05, method = 'gesd') %>%
time_recompose() %>%
plot_anomalies(
ncol = 2,
alpha_dots = 0.5,
alpha_circles = 0.5,
size_circles = 2,
time_recomposed = TRUE,
alpha_ribbon = 0.05
) + scale_y_continuous(labels = comma) +
labs(x = '', y = 'module_temperature')
) %>% layout(showlegend = FALSE, title = 'module_temperature')